In [ ]:
#Libraries we need to import
import tensorflow as tf
from tensorflow.keras import layers, Sequential
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, Dense, Flatten
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelEncoder

import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import seaborn as sns
%matplotlib inline

from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report

import os
import random
import warnings
import cv2
warnings.filterwarnings('ignore')

from keras import backend as K
from tensorflow.keras.optimizers import Adam,SGD,Adagrad,Adadelta,RMSprop
from tensorflow.keras.utils import to_categorical

# specifically for cnn
from keras.layers import Activation
from keras.layers import MaxPooling2D, BatchNormalization
In [ ]:
# Set image_size and batch_size
IMAGE_SIZE = (224, 224)
BATCH_SIZE = 32

# Target Directory
directory = 'C:\\Users\\jatin\\Desktop\\flowers'  # Path to your data

# Train Data
train_data = tf.keras.preprocessing.image_dataset_from_directory(
             directory,
             validation_split=0.2,
             subset='training',
             seed=123,
             image_size=IMAGE_SIZE,
             batch_size=BATCH_SIZE)

# Validation Data
valid_data = tf.keras.preprocessing.image_dataset_from_directory(
             directory,
             validation_split=0.2,
             subset='validation',
             seed=123,
             image_size=IMAGE_SIZE,
             batch_size=BATCH_SIZE)

# You can continue with the rest of the code as is, without any changes.
Found 4317 files belonging to 5 classes.
Using 3454 files for training.
Found 4317 files belonging to 5 classes.
Using 863 files for validation.
In [ ]:
labels = ['dandelion', 'daisy','tulip','sunflower','rose']
img_size = 224
def get_data(data_dir):
    data = [] 
    for label in labels: 
        path = os.path.join(data_dir, label)
        class_num = labels.index(label)
        for img in os.listdir(path):
            try:
                img_arr = cv2.imread(os.path.join(path, img), cv2.IMREAD_COLOR)
                resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size
                data.append([resized_arr, class_num])
            except Exception as e:
                print(e)
    return np.array(data)
In [ ]:
data = get_data('C:\\Users\\jatin\\Desktop\\flowers')
In [ ]:
class_names = train_data.class_names
class_names
Out[ ]:
['daisy', 'dandelion', 'rose', 'sunflower', 'tulip']
In [ ]:
label_encode = LabelEncoder()
class_names_label_encode = label_encode.fit_transform(class_names)
class_names_label_encode
Out[ ]:
array([0, 1, 2, 3, 4], dtype=int64)
In [ ]:
fig,ax=plt.subplots(5,2)
fig.set_size_inches(15,15)
for i in range(5):
    for j in range (2):
        l=random.randint(0,len(data))
        ax[i,j].imshow(data[l][0])
        ax[i,j].set_title('Flower: '+labels[data[l][1]])
        
plt.tight_layout()
In [ ]:
img = plt.imread('C:\\Users\\jatin\\Desktop\\flowers\\rose\\12240303_80d87f77a3_n.jpg')
plt.imshow(img)
plt.title("rose")
plt.axis("off")
plt.show();
In [ ]:
img = plt.imread('C:\\Users\\jatin\\Desktop\\flowers\\daisy\\107592979_aaa9cdfe78_m.jpg')
plt.imshow(img)
plt.title("Daisy")
plt.axis("off")
plt.show();
In [ ]:
def preprocess_image(image, label, image_shape=224):
    
    img = tf.image.resize(image, [image_shape, image_shape])
    img = img/225.
    
    return tf.cast(img, tf.float32), label
In [ ]:
preprocess_image(image=img, label='tulip')
Out[ ]:
(<tf.Tensor: shape=(224, 224, 3), dtype=float32, numpy=
 array([[[1.8509637e-01, 3.4048754e-01, 8.2715422e-02],
         [1.5368481e-01, 2.9800454e-01, 5.6082767e-02],
         [1.3732994e-01, 2.6044786e-01, 4.5396827e-02],
         ...,
         [8.9172339e-03, 8.9172339e-03, 8.9172339e-03],
         [8.8888891e-03, 8.8888891e-03, 8.8888891e-03],
         [8.8888891e-03, 8.8888891e-03, 8.8888891e-03]],
 
        [[1.7320862e-01, 3.3274943e-01, 6.0827665e-02],
         [1.5613379e-01, 3.0184808e-01, 4.8939910e-02],
         [1.4887755e-01, 2.7959752e-01, 5.0158728e-02],
         ...,
         [9.5975148e-03, 9.5975148e-03, 9.5975148e-03],
         [8.4637217e-03, 8.4637217e-03, 8.4637217e-03],
         [8.4126983e-03, 8.4126983e-03, 8.4126983e-03]],
 
        [[1.8219386e-01, 3.4113947e-01, 5.5555556e-02],
         [1.7277209e-01, 3.2316327e-01, 5.1660996e-02],
         [1.7047618e-01, 3.0601475e-01, 5.5714287e-02],
         ...,
         [9.6825389e-03, 8.0952393e-03, 1.0476189e-02],
         [5.7143043e-03, 4.1270042e-03, 6.5079541e-03],
         [5.2380944e-03, 3.6507945e-03, 6.0317442e-03]],
 
        ...,
 
        [[8.0952281e-03, 4.1587319e-02, 8.8888891e-03],
         [8.0952281e-03, 4.1587319e-02, 8.8888891e-03],
         [9.3990779e-03, 4.2891171e-02, 9.5408140e-03],
         ...,
         [1.1887739e-02, 4.2715393e-02, 7.4432944e-03],
         [1.6507907e-02, 4.7619019e-02, 1.2063463e-02],
         [9.9433679e-03, 5.1615603e-02, 5.4989234e-03]],
 
        [[5.0623766e-03, 2.8237019e-02, 1.0941421e-03],
         [9.3140574e-03, 3.2488700e-02, 4.9206545e-03],
         [1.4444423e-02, 3.9036289e-02, 7.1315290e-03],
         ...,
         [1.6507907e-02, 4.7619019e-02, 1.2063463e-02],
         [1.0640655e-02, 5.2380912e-02, 6.1962102e-03],
         [8.9229094e-03, 5.3316325e-02, 4.4784658e-03]],
 
        [[4.4501144e-03, 1.4274461e-02, 5.6699746e-06],
         [5.5385744e-03, 2.7760798e-02, 6.1793963e-04],
         [1.5685933e-02, 4.0260755e-02, 6.0600936e-03],
         ...,
         [2.1269802e-02, 5.2380912e-02, 1.6825357e-02],
         [1.0300508e-02, 5.3316325e-02, 5.8560637e-03],
         [6.2369503e-04, 4.5068137e-02, 3.1184751e-04]]], dtype=float32)>,
 'tulip')
In [ ]:
# map the preprocess_image to train_data
train_data = train_data.map(map_func=preprocess_image, num_parallel_calls=tf.data.AUTOTUNE)
# shuffle the data
train_data = train_data.shuffle(buffer_size=1000).prefetch(buffer_size=tf.data.AUTOTUNE)


# map the preprocess_image to valid_data
valid_data = valid_data.map(map_func=preprocess_image, num_parallel_calls=tf.data.AUTOTUNE)
# shuffle the data
valid_data = valid_data.shuffle(buffer_size=1000).prefetch(buffer_size=tf.data.AUTOTUNE)
In [ ]:
train_data, valid_data
Out[ ]:
(<_PrefetchDataset element_spec=(TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.int32, name=None))>,
 <_PrefetchDataset element_spec=(TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32, name=None), TensorSpec(shape=(None,), dtype=tf.int32, name=None))>)
In [ ]:
# Set random seed
tf.random.set_seed(42)

# model 1
model_1 = Sequential([
    Conv2D(filters=32, kernel_size=4, padding='same', activation='relu',input_shape=(224,224,3)),
    MaxPool2D(2,2),
    Conv2D(filters=64, kernel_size=4, padding='same', activation='relu'),
    MaxPool2D(2,2), 
    Conv2D(filters=64, kernel_size=4, padding='same', activation='relu'),
    MaxPool2D(2,2),
    Dropout(0.5),
    Flatten(),
    Dense(len(class_names_label_encode), activation='softmax')
])

# Compile
model_1.compile(loss=tf.keras.losses.SparseCategoricalCrossentropy(),
               optimizer='adam',
               metrics=['accuracy'])

# Fit
history_1 = model_1.fit(train_data,
                       epochs=1,
                       validation_data=valid_data)
108/108 [==============================] - 172s 1s/step - loss: 1.2651 - accuracy: 0.4586 - val_loss: 1.1541 - val_accuracy: 0.5330
In [ ]:
model_1.summary()
Model: "sequential_2"
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_6 (Conv2D)           (None, 224, 224, 32)      1568      
                                                                 
 max_pooling2d_6 (MaxPooling  (None, 112, 112, 32)     0         
 2D)                                                             
                                                                 
 conv2d_7 (Conv2D)           (None, 112, 112, 64)      32832     
                                                                 
 max_pooling2d_7 (MaxPooling  (None, 56, 56, 64)       0         
 2D)                                                             
                                                                 
 conv2d_8 (Conv2D)           (None, 56, 56, 64)        65600     
                                                                 
 max_pooling2d_8 (MaxPooling  (None, 28, 28, 64)       0         
 2D)                                                             
                                                                 
 dropout_6 (Dropout)         (None, 28, 28, 64)        0         
                                                                 
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_6 (Conv2D)           (None, 224, 224, 32)      1568      
                                                                 
 max_pooling2d_6 (MaxPooling  (None, 112, 112, 32)     0         
 2D)                                                             
                                                                 
 conv2d_7 (Conv2D)           (None, 112, 112, 64)      32832     
                                                                 
 max_pooling2d_7 (MaxPooling  (None, 56, 56, 64)       0         
 2D)                                                             
                                                                 
 conv2d_8 (Conv2D)           (None, 56, 56, 64)        65600     
                                                                 
 max_pooling2d_8 (MaxPooling  (None, 28, 28, 64)       0         
 2D)                                                             
                                                                 
 dropout_6 (Dropout)         (None, 28, 28, 64)        0         
                                                                 
 flatten_2 (Flatten)         (None, 50176)             0         
                                                                 
 dense_4 (Dense)             (None, 5)                 250885    
                                                                 
=================================================================
Total params: 350,885
Trainable params: 350,885
Non-trainable params: 0
_________________________________________________________________
In [ ]:
dire='C:\\Users\\jatin\\Desktop\\flowers'
In [ ]:
categories=['dandelion', 'daisy', 'sunflower', 'tulip', 'rose']
features=[]
for i in categories:
    path=os.path.join(dire,i)
    num_classes=categories.index(i)
    for img in os.listdir(path):
        if img.endswith('.jpg'):
            
            img_array=cv2.imread(os.path.join(path,img),cv2.IMREAD_COLOR)
            img_array=cv2.resize(img_array,(150,150))
            features.append([img_array,num_classes])
In [ ]:
X=[]
y=[]
for i,j in features:
    X.append(i)
    y.append(j)
In [ ]:
fig,ax=plt.subplots(5,2)
fig.set_size_inches(15,15)
for i in range(5):
    for j in range (2):
        l=np.random.randint(0,len(y))
        ax[i,j].imshow(X[l])
        ax[i,j].set_title('Flower: '+categories[y[l]])
plt.axis('off')        
plt.tight_layout()
In [ ]:
X=np.array(X).reshape(-1,150,150,3)/255.0
In [ ]:
list_dandelion=len([i for i in y if i==0])
list_daisy=len([i for i in y if i==1])
list_sunflower=len([i for i in y if i==2])
list_tulip=len([ i for i in y if i==3])
list_rose=len([i for i in y if i==4])
In [ ]:
list_species=[list_dandelion,list_daisy,list_sunflower,list_tulip,list_rose]
In [ ]:
sns.set_style('whitegrid')
plt.figure(figsize=(18,10))
plt.pie(list_species,labels=categories,startangle=90,colors=['r','g','b','y','m'],autopct='%1.1f%%',explode = (0, 0.1, 0, 0,0),shadow=True)
plt.legend()
plt.show()
In [ ]:
from tensorflow.keras.utils import to_categorical
y=to_categorical(y)
In [ ]:
from sklearn.model_selection import train_test_split
x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=12)
In [ ]:
model = Sequential()

model.add(Conv2D(32, (3, 3), input_shape=(150,150,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2, padding="same"))
model.add(Dropout(0.2))

model.add(Conv2D(64, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2, padding="same"))
model.add(Dropout(0.2))

model.add(Conv2D(128, (3, 3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(2, 2, padding="same"))
model.add(Dropout(0.2))

model.add(Flatten())
model.add(Dense(512, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(128, activation="relu"))
model.add(Dropout(0.2))
model.add(Dense(5, activation="softmax"))
In [ ]:
epochs=5

from keras.callbacks import ReduceLROnPlateau
red_lr= ReduceLROnPlateau(monitor='val_acc',patience=3,verbose=1,factor=0.1)
In [ ]:
datagen = ImageDataGenerator(
        featurewise_center=False,  # set input mean to 0 over the dataset
        samplewise_center=False,  # set each sample mean to 0
        featurewise_std_normalization=False,  # divide inputs by std of the dataset
        samplewise_std_normalization=False,  # divide each input by its std
        zca_whitening=False,  # apply ZCA whitening
        rotation_range=10,  # randomly rotate images in the range (degrees, 0 to 180)
        zoom_range = 0.1, # Randomly zoom image 
        width_shift_range=0.2,  # randomly shift images horizontally (fraction of total width)
        height_shift_range=0.2,  # randomly shift images vertically (fraction of total height)
        horizontal_flip=True,  # randomly flip images
        vertical_flip=False)  # randomly flip images


datagen.fit(x_train)
In [ ]:
model.compile(optimizer=Adam(lr=0.001),loss='categorical_crossentropy',metrics=['accuracy'])
WARNING:absl:`lr` is deprecated in Keras optimizer, please use `learning_rate` or use the legacy optimizer, e.g.,tf.keras.optimizers.legacy.Adam.
In [ ]:
model.summary()
Model: "sequential_3"
_________________________________________________________________
_________________________________________________________________
 Layer (type)                Output Shape              Param #   
=================================================================
 conv2d_9 (Conv2D)           (None, 148, 148, 32)      896       
                                                                 
 activation_3 (Activation)   (None, 148, 148, 32)      0         
                                                                 
 max_pooling2d_9 (MaxPooling  (None, 74, 74, 32)       0         
 2D)                                                             
                                                                 
 dropout_7 (Dropout)         (None, 74, 74, 32)        0         
                                                                 
 conv2d_10 (Conv2D)          (None, 72, 72, 64)        18496     
                                                                 
 activation_4 (Activation)   (None, 72, 72, 64)        0         
                                                                 
 max_pooling2d_10 (MaxPoolin  (None, 36, 36, 64)       0         
 g2D)                                                            
                                                                 
 dropout_8 (Dropout)         (None, 36, 36, 64)        0         
                                                                 
 conv2d_11 (Conv2D)          (None, 34, 34, 128)       73856     
                                                                 
 activation_5 (Activation)   (None, 34, 34, 128)       0         
                                                                 
 max_pooling2d_11 (MaxPoolin  (None, 17, 17, 128)      0         
 g2D)                                                            
                                                                 
 dropout_9 (Dropout)         (None, 17, 17, 128)       0         
                                                                 
 flatten_3 (Flatten)         (None, 36992)             0         
                                                                 
 dense_5 (Dense)             (None, 512)               18940416  
                                                                 
 dropout_10 (Dropout)        (None, 512)               0         
                                                                 
 dense_6 (Dense)             (None, 128)               65664     
                                                                 
 dropout_11 (Dropout)        (None, 128)               0         
                                                                 
 dense_7 (Dense)             (None, 5)                 645       
                                                                 
=================================================================
Total params: 19,099,973
Trainable params: 19,099,973
Non-trainable params: 0
_________________________________________________________________
In [ ]:
History = model.fit_generator(datagen.flow(x_train,y_train, batch_size=128),
                              epochs = epochs, validation_data = (x_test,y_test),
                              verbose = 1, steps_per_epoch=x_train.shape[0] // 128)
# model.fit(x_train,y_train,epochs=epochs,batch_size=batch_size,validation_data = (x_test,y_test))
Epoch 1/5
26/26 [==============================] - 77s 3s/step - loss: 1.8325 - accuracy: 0.2496 - val_loss: 1.4922 - val_accuracy: 0.3553
Epoch 2/5
26/26 [==============================] - 70s 3s/step - loss: 1.3188 - accuracy: 0.4038 - val_loss: 1.2045 - val_accuracy: 0.4965
Epoch 3/5
26/26 [==============================] - 68s 3s/step - loss: 1.2169 - accuracy: 0.4746 - val_loss: 1.1498 - val_accuracy: 0.5417
Epoch 4/5
26/26 [==============================] - 69s 3s/step - loss: 1.1845 - accuracy: 0.4932 - val_loss: 1.1388 - val_accuracy: 0.5255
Epoch 5/5
26/26 [==============================] - 68s 3s/step - loss: 1.0963 - accuracy: 0.5528 - val_loss: 1.0876 - val_accuracy: 0.5718
In [ ]:
sns.set_style('whitegrid')
plt.figure(figsize=(12,5))
plt.plot(History.history['loss'])
plt.plot(History.history['val_loss'])
plt.title('Model Loss')
plt.ylabel('Loss')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
In [ ]:
sns.set_style('whitegrid')
plt.figure(figsize=(12,5))
plt.plot(History.history['accuracy'])
plt.plot(History.history['val_accuracy'])
plt.title('Model Accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epochs')
plt.legend(['train', 'test'])
plt.show()
In [ ]:
preds=model.predict(x_test)
27/27 [==============================] - 3s 117ms/step
In [ ]:
predictions=np.argmax(preds,axis=1)
In [ ]:
correct_class=[]
incorrect_class=[]
i=0
for i in range(len(y_test)):
    if(np.argmax(y_test[i])==predictions[i]):
        correct_class.append(i)
    if(len(correct_class)==8):
        break
In [ ]:
i=0
for i in range(len(y_test)):
    
    if (np.argmax(y_test[i])!=predictions[i]):
        
        incorrect_class.append(i)
    if (len(incorrect_class)==8):
        break
In [ ]:
count=0
fig,ax=plt.subplots(4,2)
fig.set_size_inches(15,15)
for i in range (4):
    for j in range (2):
        ax[i,j].imshow(x_test[correct_class[count]])
        ax[i,j].set_title("Predicted Flower : "+ categories[predictions[correct_class[count]]] +"\n"+"Actual Flower : "+ categories[np.argmax(y_test[correct_class[count]])])
        plt.tight_layout()
        count+=1
In [ ]:
count=0
fig,ax=plt.subplots(4,2)
fig.set_size_inches(15,15)
for i in range(4):
    for j in range(2):
        if count < len(incorrect_class):  # Check if count is less than the length of incorrect_class
            ax[i,j].imshow(x_test[incorrect_class[count]])
            ax[i,j].set_title("Predicted flower : " + categories[predictions[incorrect_class[count]]] + "\n"+"Actual Flower : " +categories[np.argmax(y_test[incorrect_class[count]])])
            plt.tight_layout()
            count+=1
        else:
            break
In [ ]:
from sklearn.metrics import confusion_matrix
import seaborn as sns

# Generate confusion matrix
matrix = confusion_matrix(y_true=np.argmax(y_test, axis=1), y_pred=predictions)

# Create a heatmap
plt.figure(figsize=(10, 7))
sns.heatmap(matrix, annot=True, fmt='d', cmap='Blues', xticklabels=categories, yticklabels=categories)
plt.xlabel('Predicted')
plt.ylabel('True')
plt.title('Confusion Matrix')
plt.show()
In [ ]: